x86/HVM: add wrapper for hvm_funcs.set_tsc_offset()
authorJan Beulich <jbeulich@suse.com>
Thu, 19 Jul 2018 07:40:19 +0000 (09:40 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 19 Jul 2018 07:40:19 +0000 (09:40 +0200)
It's used in quite a few places, and hence doing so eases subsequent
adjustment to how these (indirect) calls are carried out.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/hvm/domain.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vvmx.c
xen/arch/x86/time.c
xen/include/asm-x86/hvm/hvm.h

index ce15ce0470c91f4f740028d9a53a4376cc81f1fa..ae70aaf8f95e04a8bc91e0cb3de797a0410a6afc 100644 (file)
@@ -317,9 +317,9 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
 
     /* Sync AP's TSC with BSP's. */
     v->arch.hvm_vcpu.cache_tsc_offset =
-        v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
-                             v->domain->arch.hvm_domain.sync_tsc);
+        d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+                       d->arch.hvm_domain.sync_tsc);
 
     paging_update_paging_modes(v);
 
index f9408e1ee4b3b4a6fb80c325fc14faf14ab38336..4ed24a401d57f7b8541c6a6c99552d6b52888bb3 100644 (file)
@@ -417,7 +417,7 @@ static void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
     delta_tsc = guest_tsc - tsc;
     v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
 
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
 }
 
 #define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0)
@@ -435,7 +435,7 @@ static void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
 {
     v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
                             - v->arch.hvm_vcpu.msr_tsc_adjust;
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
     v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
 }
 
@@ -3941,8 +3941,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
     /* Sync AP's TSC with BSP's. */
     v->arch.hvm_vcpu.cache_tsc_offset =
         v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
-                             d->arch.hvm_domain.sync_tsc);
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+                       d->arch.hvm_domain.sync_tsc);
 
     v->arch.hvm_vcpu.msr_tsc_adjust = 0;
 
index e97db330cfca7396a8921d74d24315d2be98bbd6..918d47df931c59330907500d2038030b5492367d 100644 (file)
@@ -1082,7 +1082,7 @@ static void load_shadow_guest_state(struct vcpu *v)
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
     }
 
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
     vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields);
 
@@ -1288,7 +1288,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
     }
 
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+    hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
     set_vvmcs(v, VM_ENTRY_INTR_INFO, 0);
 }
index c342d0073211c2e0aeef961a2d8480166f7f8766..536449b264ca0abc5879d48f2bd9213b34b9a05e 100644 (file)
@@ -2198,9 +2198,9 @@ void tsc_set_info(struct domain *d,
              * will sync their TSC to BSP's sync_tsc.
              */
             d->arch.hvm_domain.sync_tsc = rdtsc();
-            hvm_funcs.set_tsc_offset(d->vcpu[0],
-                                     d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
-                                     d->arch.hvm_domain.sync_tsc);
+            hvm_set_tsc_offset(d->vcpu[0],
+                               d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
+                               d->arch.hvm_domain.sync_tsc);
         }
     }
 
index 667efa10bcf38bd6a9f30027f03b134ba4e19f5e..1ee273b075ddbfac0620fb5fcb152da95d049fe5 100644 (file)
@@ -347,6 +347,12 @@ static inline void hvm_cpuid_policy_changed(struct vcpu *v)
     hvm_funcs.cpuid_policy_changed(v);
 }
 
+static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
+                                      uint64_t at_tsc)
+{
+    hvm_funcs.set_tsc_offset(v, offset, at_tsc);
+}
+
 /*
  * Called to ensure than all guest-specific mappings in a tagged TLB are 
  * flushed; does *not* flush Xen's TLB entries, and on processors without a